}
}
-int shadow_fault(unsigned long va, long error_code)
+int shadow_fault(unsigned long va, struct xen_regs *regs)
{
unsigned long gpte, spte = 0;
struct exec_domain *ed = current;
struct domain *d = ed->domain;
- SH_VVLOG("shadow_fault( va=%p, code=%lu )", va, error_code );
+ SH_VVLOG("shadow_fault( va=%p, code=%lu )", va, regs->error_code );
check_pagetable(d, ed->arch.guest_table, "pre-sf");
return 0;
}
- if ( (error_code & 2) && !(gpte & _PAGE_RW) )
+ if ( (regs->error_code & 2) && !(gpte & _PAGE_RW) )
{
/* Write fault on a read-only mapping. */
return 0;
}
/* Write fault? */
- if ( error_code & 2 )
+ if ( regs->error_code & 2 )
{
if ( unlikely(!(gpte & _PAGE_RW)) )
{
ptwr_do_page_fault(addr) )
{
if ( unlikely(shadow_mode_enabled(d)) )
- (void)shadow_fault(addr, regs->error_code);
+ (void)shadow_fault(addr, regs);
UNLOCK_BIGLOCK(d);
return EXCRET_fault_fixed;
}
}
if ( unlikely(shadow_mode_enabled(d)) &&
- (addr < PAGE_OFFSET) && shadow_fault(addr, regs->error_code) )
+ (addr < PAGE_OFFSET) && shadow_fault(addr, regs) )
return EXCRET_fault_fixed;
if ( unlikely(addr >= LDT_VIRT_START(ed)) &&
#include <asm/domain_page.h>
-static int vmx_do_page_fault(unsigned long va, unsigned long error_code)
+static int vmx_do_page_fault(unsigned long va, struct xen_regs *regs)
{
unsigned long eip;
unsigned long gpa;
{
__vmread(GUEST_EIP, &eip);
VMX_DBG_LOG(DBG_LEVEL_VMMU,
- "vmx_do_page_fault = 0x%lx, eip = %lx, erro_code = %lx",
- va, eip, error_code);
+ "vmx_do_page_fault = 0x%lx, eip = %lx, error_code = %lx",
+ va, eip, regs->error_code);
}
#endif
if (mmio_space(gpa))
handle_mmio(va, gpa);
- if ((result = shadow_fault(va, error_code)))
+ if ((result = shadow_fault(va, regs)))
return result;
return 0; /* failed to resolve, i.e raise #PG */
int error;
unsigned int vector;
unsigned long va;
- unsigned long error_code;
if ((error = __vmread(VM_EXIT_INTR_INFO, &vector))
&& !(vector & INTR_INFO_VALID_MASK))
case TRAP_page_fault:
{
__vmread(EXIT_QUALIFICATION, &va);
- __vmread(VM_EXIT_INTR_ERROR_CODE, &error_code);
+ __vmread(VM_EXIT_INTR_ERROR_CODE, ®s.error_code);
VMX_DBG_LOG(DBG_LEVEL_VMMU,
"eax=%lx, ebx=%lx, ecx=%lx, edx=%lx, esi=%lx, edi=%lx",
regs.eax, regs.ebx, regs.ecx, regs.edx, regs.esi,
regs.edi);
ed->arch.arch_vmx.vmx_platform.mpci.inst_decoder_regs = ®s;
- if (!(error = vmx_do_page_fault(va, error_code))) {
+ if (!(error = vmx_do_page_fault(va, ®s))) {
/*
* Inject #PG using Interruption-Information Fields
*/
INTR_INFO_DELIEVER_CODE_MASK |
TRAP_page_fault);
__vmwrite(VM_ENTRY_INTR_INFO_FIELD, intr_fields);
- __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, error_code);
+ __vmwrite(VM_ENTRY_EXCEPTION_ERROR_CODE, regs.error_code);
ed->arch.arch_vmx.cpu_cr2 = va;
}
break;
extern void shadow_mode_init(void);
extern int shadow_mode_control(struct domain *p, dom0_shadow_control_t *sc);
-extern int shadow_fault(unsigned long va, long error_code);
+extern int shadow_fault(unsigned long va, struct xen_regs *regs);
extern void shadow_l1_normal_pt_update(
unsigned long pa, unsigned long gpte,
unsigned long *prev_spfn_ptr, l1_pgentry_t **prev_spl1e_ptr);